From 4ca687891054b6f8f3b56349cdff57aa4838a103 Mon Sep 17 00:00:00 2001 From: "iap10@labyrinth.cl.cam.ac.uk" Date: Wed, 2 Feb 2005 12:43:34 +0000 Subject: [PATCH] bitkeeper revision 1.1159.238.2 (4200caf6iFnj85XmiFNAz7VursMGUw) Slab caches for things allocated only on initialization seems to be overkill. This patch replaces them with the previous typesafe allocator. Signed-off-by: Rusty Russell (authored) Signed-off-by: ian.pratt@cl.cam.ac.uk --- xen/arch/x86/domain.c | 26 ++++---------------------- xen/arch/x86/setup.c | 2 -- xen/common/sched_atropos.c | 8 +------- xen/common/sched_bvt.c | 14 ++------------ xen/common/sched_rrobin.c | 15 ++------------- 5 files changed, 9 insertions(+), 56 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 6270c09985..be9fd82f00 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -223,42 +223,24 @@ void dump_pageframe_info(struct domain *d) page->u.inuse.type_info); } -xmem_cache_t *domain_struct_cachep; -xmem_cache_t *exec_domain_struct_cachep; - -void __init domain_startofday(void) -{ - domain_struct_cachep = xmem_cache_create( - "domain_cache", sizeof(struct domain), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - if ( domain_struct_cachep == NULL ) - panic("No slab cache for domain structs."); - - exec_domain_struct_cachep = xmem_cache_create( - "exec_dom_cache", sizeof(struct exec_domain), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - if ( exec_domain_struct_cachep == NULL ) - BUG(); -} - struct domain *arch_alloc_domain_struct(void) { - return xmem_cache_alloc(domain_struct_cachep); + return xmalloc(struct domain); } void arch_free_domain_struct(struct domain *d) { - xmem_cache_free(domain_struct_cachep, d); + xfree(d); } struct exec_domain *arch_alloc_exec_domain_struct(void) { - return xmem_cache_alloc(exec_domain_struct_cachep); + return xmalloc(struct exec_domain); } void arch_free_exec_domain_struct(struct exec_domain *ed) { - xmem_cache_free(exec_domain_struct_cachep, ed); + xfree(ed); } void free_perdomain_pt(struct domain *d) diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 5bf8149168..f38fc62ff4 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -602,8 +602,6 @@ void __init __start_xen(multiboot_info_t *mbi) xmem_cache_init(); xmem_cache_sizes_init(max_page); - domain_startofday(); - start_of_day(); grant_table_init(); diff --git a/xen/common/sched_atropos.c b/xen/common/sched_atropos.c index 32c0090fbb..ab84efa3ce 100644 --- a/xen/common/sched_atropos.c +++ b/xen/common/sched_atropos.c @@ -69,8 +69,6 @@ struct at_cpu_info static void at_dump_cpu_state(int cpu); -static xmem_cache_t *dom_info_cache; - static inline void __add_to_runqueue_head(struct domain *d) { list_add(RUNLIST(d), RUNQ(d->processor)); @@ -558,10 +556,6 @@ static int at_init_scheduler() INIT_LIST_HEAD(RUNQ(i)); } - dom_info_cache = xmem_cache_create("Atropos dom info", - sizeof(struct at_dom_info), - 0, 0, NULL, NULL); - return 0; } @@ -649,7 +643,7 @@ static int at_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) /* free memory associated with a task */ static void at_free_task(struct domain *p) { - xmem_cache_free( dom_info_cache, DOM_INFO(p) ); + xfree( DOM_INFO(p) ); } diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c index 59b70ecfc6..12c930ac0d 100644 --- a/xen/common/sched_bvt.c +++ b/xen/common/sched_bvt.c @@ -71,8 +71,6 @@ struct bvt_cpu_info #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */ static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */ -static xmem_cache_t *dom_info_cache; - static inline void __add_to_runqueue_head(struct exec_domain *d) { list_add(RUNLIST(d), RUNQUEUE(d->processor)); @@ -173,7 +171,7 @@ int bvt_alloc_task(struct exec_domain *ed) { struct domain *d = ed->domain; if ( (d->sched_priv == NULL) ) { - if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL ) + if ( (d->sched_priv = new(struct bvt_dom_info)) == NULL ) return -1; memset(d->sched_priv, 0, sizeof(struct bvt_dom_info)); } @@ -295,7 +293,7 @@ static void bvt_sleep(struct exec_domain *d) void bvt_free_task(struct domain *d) { ASSERT(d->sched_priv != NULL); - xmem_cache_free(dom_info_cache, d->sched_priv); + xfree(d->sched_priv); } /* Control the scheduler. */ @@ -570,14 +568,6 @@ int bvt_init_scheduler() CPU_SVT(i) = 0; /* XXX do I really need to do this? */ } - dom_info_cache = xmem_cache_create( - "BVT dom info", sizeof(struct bvt_dom_info), 0, 0, NULL, NULL); - if ( dom_info_cache == NULL ) - { - printk("BVT: Failed to allocate domain info SLAB cache"); - return -1; - } - return 0; } diff --git a/xen/common/sched_rrobin.c b/xen/common/sched_rrobin.c index 2164ce22b4..73d03bc787 100644 --- a/xen/common/sched_rrobin.c +++ b/xen/common/sched_rrobin.c @@ -27,8 +27,6 @@ struct rrobin_dom_info #define RUNLIST(d) ((struct list_head *)&(RR_INFO(d)->run_list)) #define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle) -static xmem_cache_t *dom_info_cache; - static inline void __add_to_runqueue_head(struct domain *d) { list_add(RUNLIST(d), RUNQUEUE(d->processor)); @@ -59,21 +57,12 @@ static int rr_init_scheduler() for ( i = 0; i < NR_CPUS; i++ ) INIT_LIST_HEAD(RUNQUEUE(i)); - dom_info_cache = xmem_cache_create( - "RR dom info", sizeof(struct rrobin_dom_info), 0, 0, 0, NULL); - if ( dom_info_cache == NULL ) - { - printk("Could not allocate SLAB cache.\n"); - return -1; - } - return 0; } - /* Allocates memory for per domain private scheduling data*/ static int rr_alloc_task(struct domain *d) { - if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL ) + if ( (d->sched_priv = new(struct rrobin_dom_info) == NULL ) return -1; memset(d->sched_priv, 0, sizeof(struct rrobin_dom_info)); return 0; @@ -91,7 +80,7 @@ static void rr_add_task(struct domain *d) static void rr_free_task(struct domain *d) { ASSERT(d->sched_priv != NULL); - xmem_cache_free(dom_info_cache, d->sched_priv); + xfree(d->sched_priv); } /* Initialises idle task */ -- 2.30.2